memcpy(&v->arch.guest_context, c.nat, sizeof(*c.nat));
#ifdef CONFIG_COMPAT
else
- {
XLAT_vcpu_guest_context(&v->arch.guest_context, c.cmp);
- }
#endif
+ v->arch.guest_context.user_regs.eflags |= 2;
+
/* Only CR0.TS is modifiable by guest or admin. */
v->arch.guest_context.ctrlreg[0] &= X86_CR0_TS;
v->arch.guest_context.ctrlreg[0] |= read_cr0() & ~X86_CR0_TS;
/* Ensure real hardware interrupts are enabled. */
v->arch.guest_context.user_regs.eflags |= EF_IE;
}
- else
- {
- hvm_load_cpu_guest_regs(v, &v->arch.guest_context.user_regs);
- }
if ( v->is_initialised )
goto out;
{
if ( !is_pv_32on64_domain(v->domain) )
{
- hvm_store_cpu_guest_regs(v, &c.nat->user_regs);
memset(c.nat->ctrlreg, 0, sizeof(c.nat->ctrlreg));
c.nat->ctrlreg[0] = v->arch.hvm_vcpu.guest_cr[0];
c.nat->ctrlreg[2] = v->arch.hvm_vcpu.guest_cr[2];
#ifdef CONFIG_COMPAT
else
{
- struct cpu_user_regs user_regs;
- unsigned i;
-
- hvm_store_cpu_guest_regs(v, &user_regs);
- XLAT_cpu_user_regs(&c.cmp->user_regs, &user_regs);
memset(c.cmp->ctrlreg, 0, sizeof(c.cmp->ctrlreg));
c.cmp->ctrlreg[0] = v->arch.hvm_vcpu.guest_cr[0];
c.cmp->ctrlreg[2] = v->arch.hvm_vcpu.guest_cr[2];
ctxt.rbp = vc->user_regs.ebp;
ctxt.rsi = vc->user_regs.esi;
ctxt.rdi = vc->user_regs.edi;
- /* %rsp handled by arch-specific call above */
-#ifdef __x86_64__
+ ctxt.rsp = vc->user_regs.esp;
+ ctxt.rip = vc->user_regs.eip;
+ ctxt.rflags = vc->user_regs.eflags;
+#ifdef __x86_64__
ctxt.r8 = vc->user_regs.r8;
ctxt.r9 = vc->user_regs.r9;
ctxt.r10 = vc->user_regs.r10;
vc->user_regs.esi = ctxt.rsi;
vc->user_regs.edi = ctxt.rdi;
vc->user_regs.esp = ctxt.rsp;
+ vc->user_regs.eip = ctxt.rip;
+ vc->user_regs.eflags = ctxt.rflags | 2;
#ifdef __x86_64__
vc->user_regs.r8 = ctxt.r8;
vc->user_regs.r9 = ctxt.r9;
goto out;
}
- hvm_store_cpu_guest_regs(v, regs);
-
ptss = hvm_map(prev_tr.base, sizeof(tss));
if ( ptss == NULL )
goto out;
hvm_copy_to_guest_virt(linear_addr, &errcode, 4);
}
- hvm_load_cpu_guest_regs(v, regs);
-
out:
hvm_unmap(optss_desc);
hvm_unmap(nptss_desc);
#endif
case 4:
case 2:
- hvm_store_cpu_guest_regs(current, regs);
if ( unlikely(ring_3(regs)) )
{
default:
/* Copy register changes back into current guest state. */
regs->eflags &= ~X86_EFLAGS_RF;
- hvm_load_cpu_guest_regs(v, regs);
memcpy(guest_cpu_user_regs(), regs, HVM_CONTEXT_STACK_BYTES);
out:
/* Copy current guest state into io instruction state structure. */
memcpy(regs, guest_cpu_user_regs(), HVM_CONTEXT_STACK_BYTES);
- hvm_store_cpu_guest_regs(v, regs);
df = regs->eflags & X86_EFLAGS_DF ? 1 : 0;
#define DECODE_SIB_BASE(prefix, sib) DECODE_MODRM_RM(prefix, sib)
-static inline unsigned long DECODE_GPR_VALUE(struct vmcb_struct *vmcb,
- struct cpu_user_regs *regs, u8 gpr_rm)
+static inline unsigned long DECODE_GPR_VALUE(
+ struct cpu_user_regs *regs, u8 gpr_rm)
{
unsigned long value;
switch (gpr_rm)
value = regs->ebx;
break;
case 0x4:
- value = (unsigned long)vmcb->rsp;
+ value = regs->esp;
case 0x5:
value = regs->ebp;
break;
}
else
{
- effective_addr = DECODE_GPR_VALUE(vmcb, regs, modrm_rm);
+ effective_addr = DECODE_GPR_VALUE(regs, modrm_rm);
}
break;
#if __x86_64__
/* 64-bit mode */
if (vmcb->cs.attr.fields.l && hvm_long_mode_enabled(v))
- return vmcb->rip + inst_len + *size + disp;
+ return regs->eip + inst_len + *size + disp;
#endif
return disp;
default:
- effective_addr = DECODE_GPR_VALUE(vmcb, regs, modrm_rm);
+ effective_addr = DECODE_GPR_VALUE(regs, modrm_rm);
}
sib_idx = DECODE_SIB_INDEX(prefix, sib);
sib_base = DECODE_SIB_BASE(prefix, sib);
- base = DECODE_GPR_VALUE(vmcb, regs, sib_base);
+ base = DECODE_GPR_VALUE(regs, sib_base);
if ((unsigned long)-1 == base)
{
if (4 == sib_idx)
return base;
- effective_addr = DECODE_GPR_VALUE(vmcb, regs, sib_idx);
+ effective_addr = DECODE_GPR_VALUE(regs, sib_idx);
effective_addr <<= sib_scale;
* no matter what kind of addressing is used.
*/
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
- unsigned long p = vmcb->cs.base + vmcb->rip;
+ unsigned long p = vmcb->cs.base + guest_cpu_user_regs()->eip;
+ ASSERT(v == current);
if (!(vmcb->cs.attr.fields.l && hvm_long_mode_enabled(v)))
return (u32)p; /* mask to 32 bits */
/* NB. Should mask to 16 bits if in real mode or 16-bit protected mode. */
/* hardware assisted paging bits */
extern int opt_hap_enabled;
+static void inline __update_guest_eip(
+ struct cpu_user_regs *regs, int inst_len)
+{
+ ASSERT(inst_len > 0);
+ regs->eip += inst_len;
+ regs->eflags &= ~X86_EFLAGS_RF;
+}
+
static void svm_inject_exception(
struct vcpu *v, int trap, int ev, int error_code)
{
#endif
}
-static void svm_store_cpu_guest_regs(
- struct vcpu *v, struct cpu_user_regs *regs)
-{
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
-
- regs->esp = vmcb->rsp;
- regs->eflags = vmcb->rflags;
- regs->eip = vmcb->rip;
-}
-
static enum handler_return long_mode_do_msr_write(struct cpu_user_regs *regs)
{
u64 msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
{
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
- c->rip = vmcb->rip;
-
-#ifdef HVM_DEBUG_SUSPEND
- printk("%s: eip=0x%"PRIx64".\n",
- __func__,
- inst_len, c->eip);
-#endif
-
- c->rsp = vmcb->rsp;
- c->rflags = vmcb->rflags;
-
c->cr0 = v->arch.hvm_vcpu.guest_cr[0];
c->cr2 = v->arch.hvm_vcpu.guest_cr[2];
c->cr3 = v->arch.hvm_vcpu.guest_cr[3];
c->cr4 = v->arch.hvm_vcpu.guest_cr[4];
-#ifdef HVM_DEBUG_SUSPEND
- printk("%s: cr3=0x%"PRIx64", cr0=0x%"PRIx64", cr4=0x%"PRIx64".\n",
- __func__,
- c->cr3,
- c->cr0,
- c->cr4);
-#endif
-
c->idtr_limit = vmcb->idtr.limit;
c->idtr_base = vmcb->idtr.base;
v->arch.guest_table = pagetable_from_pfn(mfn);
}
- vmcb->rip = c->rip;
- vmcb->rsp = c->rsp;
- vmcb->rflags = c->rflags;
-
v->arch.hvm_vcpu.guest_cr[0] = c->cr0 | X86_CR0_ET;
v->arch.hvm_vcpu.guest_cr[2] = c->cr2;
v->arch.hvm_vcpu.guest_cr[3] = c->cr3;
return !vmcb->interrupt_shadow;
ASSERT((type == hvm_intack_pic) || (type == hvm_intack_lapic));
- return !irq_masked(vmcb->rflags) && !vmcb->interrupt_shadow;
+ return (!irq_masked(guest_cpu_user_regs()->eflags) &&
+ !vmcb->interrupt_shadow);
}
static int svm_guest_x86_mode(struct vcpu *v)
if ( unlikely(!(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE)) )
return 0;
- if ( unlikely(vmcb->rflags & X86_EFLAGS_VM) )
+ if ( unlikely(guest_cpu_user_regs()->eflags & X86_EFLAGS_VM) )
return 1;
if ( hvm_long_mode_enabled(v) && likely(vmcb->cs.attr.fields.l) )
return 8;
*/
svm_reset_to_realmode(v, regs);
/* Adjust the vmcb's hidden register state. */
- vmcb->rip = 0;
vmcb->cs.sel = cs_sel;
vmcb->cs.base = (cs_sel << 4);
}
*(u16 *)(hypercall_page + (__HYPERVISOR_iret * 32)) = 0x0b0f; /* ud2 */
}
-static void svm_load_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *regs)
-{
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
-
- vmcb->rsp = regs->esp;
- vmcb->rflags = regs->eflags | 2UL;
- vmcb->rip = regs->eip;
-}
-
static void svm_ctxt_switch_from(struct vcpu *v)
{
int cpu = smp_processor_id();
.domain_destroy = svm_domain_destroy,
.vcpu_initialise = svm_vcpu_initialise,
.vcpu_destroy = svm_vcpu_destroy,
- .store_cpu_guest_regs = svm_store_cpu_guest_regs,
- .load_cpu_guest_regs = svm_load_cpu_guest_regs,
.save_cpu_ctxt = svm_save_vmcb_ctxt,
.load_cpu_ctxt = svm_load_vmcb_ctxt,
.interrupts_enabled = svm_interrupts_enabled,
inst_len = __get_instruction_length(v, INSTR_CPUID, NULL);
ASSERT(inst_len > 0);
- __update_guest_eip(vmcb, inst_len);
+ __update_guest_eip(regs, inst_len);
}
static unsigned long *get_reg_p(
reg_p = (unsigned long *)®s->ebp;
break;
case SVM_REG_ESP:
- reg_p = (unsigned long *)&vmcb->rsp;
+ reg_p = (unsigned long *)®s->esp;
break;
#ifdef __x86_64__
case SVM_REG_R8:
* than one byte (+ maybe rep-prefix), we have some prefix so we need
* to figure out what it is...
*/
- isize = vmcb->exitinfo2 - vmcb->rip;
+ isize = vmcb->exitinfo2 - regs->eip;
if (info.fields.rep)
isize --;
/* Copy current guest state into io instruction state structure. */
memcpy(regs, guest_cpu_user_regs(), HVM_CONTEXT_STACK_BYTES);
- svm_store_cpu_guest_regs(v, regs);
info.bytes = vmcb->exitinfo1;
HVM_DBG_LOG(DBG_LEVEL_IO,
"svm_io_instruction: port 0x%x eip=%x:%"PRIx64", "
"exit_qualification = %"PRIx64,
- port, vmcb->cs.sel, vmcb->rip, info.bytes);
+ port, vmcb->cs.sel, (uint64_t)regs->eip, info.bytes);
/* string instruction */
if (info.fields.str)
if (index > 0 && (buffer[index-1] & 0xF0) == 0x40)
prefix = buffer[index-1];
- HVM_DBG_LOG(DBG_LEVEL_1, "eip = %lx", (unsigned long) vmcb->rip);
+ HVM_DBG_LOG(DBG_LEVEL_1, "eip = %lx", (unsigned long)regs->eip);
switch ( match )
ASSERT(inst_len);
if ( result )
- __update_guest_eip(vmcb, inst_len);
+ __update_guest_eip(regs, inst_len);
}
static void svm_do_msr_access(
inst_len = __get_instruction_length(v, INSTR_WRMSR, NULL);
}
- __update_guest_eip(vmcb, inst_len);
+ __update_guest_eip(regs, inst_len);
}
-static void svm_vmexit_do_hlt(struct vmcb_struct *vmcb)
+static void svm_vmexit_do_hlt(struct vmcb_struct *vmcb,
+ struct cpu_user_regs *regs)
{
enum hvm_intack type = hvm_vcpu_has_pending_irq(current);
- __update_guest_eip(vmcb, 1);
+ __update_guest_eip(regs, 1);
/* Check for interrupt not handled or new interrupt. */
if ( vmcb->eventinj.fields.v ||
}
HVMTRACE_1D(HLT, current, /*int pending=*/ 0);
- hvm_hlt(vmcb->rflags);
+ hvm_hlt(regs->eflags);
}
-static void svm_vmexit_do_invd(struct vcpu *v)
+static void svm_vmexit_do_invd(struct cpu_user_regs *regs)
{
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
- int inst_len;
+ int inst_len;
/* Invalidate the cache - we can't really do that safely - maybe we should
* WBINVD, but I think it's just fine to completely ignore it - we should
*/
gdprintk(XENLOG_WARNING, "INVD instruction intercepted - ignored\n");
- inst_len = __get_instruction_length(v, INSTR_INVD, NULL);
- __update_guest_eip(vmcb, inst_len);
+ inst_len = __get_instruction_length(current, INSTR_INVD, NULL);
+ __update_guest_eip(regs, inst_len);
}
void svm_handle_invlpg(const short invlpga, struct cpu_user_regs *regs)
u8 opcode[MAX_INST_LEN], prefix, length = MAX_INST_LEN;
unsigned long g_vaddr;
int inst_len;
- struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
/*
* Unknown how many bytes the invlpg instruction will take. Use the
{
inst_len = __get_instruction_length(v, INSTR_INVLPGA, opcode);
ASSERT(inst_len > 0);
- __update_guest_eip(vmcb, inst_len);
+ __update_guest_eip(regs, inst_len);
/*
* The address is implicit on this instruction. At the moment, we don't
&opcode[inst_len], &length);
inst_len += length;
- __update_guest_eip (vmcb, inst_len);
+ __update_guest_eip(regs, inst_len);
}
HVMTRACE_3D(INVLPG, v, (invlpga?1:0), g_vaddr, (invlpga?regs->ecx:0));
memset(regs, 0, sizeof(struct cpu_user_regs));
+ regs->eflags = 2;
+
v->arch.hvm_vcpu.guest_cr[0] = X86_CR0_ET;
svm_update_guest_cr(v, 0);
vmcb->efer = EFER_SVME;
/* This will jump to ROMBIOS */
- vmcb->rip = 0xFFF0;
+ regs->eip = 0xFFF0;
/* Set up the segment registers and all their hidden states. */
vmcb->cs.sel = 0xF000;
vmcb->idtr.limit = 0x3ff;
vmcb->idtr.base = 0x00;
- vmcb->rax = 0;
- vmcb->rsp = 0;
-
return 0;
}
asmlinkage void svm_vmexit_handler(struct cpu_user_regs *regs)
{
unsigned int exit_reason;
- unsigned long eip;
struct vcpu *v = current;
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
eventinj_t eventinj;
exit_reason = vmcb->exitcode;
- HVMTRACE_2D(VMEXIT, v, vmcb->rip, exit_reason);
+ HVMTRACE_2D(VMEXIT, v, regs->eip, exit_reason);
if ( unlikely(exit_reason == VMEXIT_INVALID) )
{
}
perfc_incra(svmexits, exit_reason);
- eip = vmcb->rip;
/* Event delivery caused this intercept? Queue for redelivery. */
eventinj = vmcb->exitintinfo;
goto exit_and_crash;
/* AMD Vol2, 15.11: INT3, INTO, BOUND intercepts do not update RIP. */
inst_len = __get_instruction_length(v, INSTR_INT3, NULL);
- __update_guest_eip(vmcb, inst_len);
+ __update_guest_eip(regs, inst_len);
domain_pause_for_debugger();
break;
case VMEXIT_EXCEPTION_MC:
HVMTRACE_0D(MCE, v);
- svm_store_cpu_guest_regs(v, regs);
do_machine_check(regs);
break;
break;
case VMEXIT_INVD:
- svm_vmexit_do_invd(v);
+ svm_vmexit_do_invd(regs);
break;
case VMEXIT_TASK_SWITCH: {
break;
case VMEXIT_HLT:
- svm_vmexit_do_hlt(vmcb);
+ svm_vmexit_do_hlt(vmcb, regs);
break;
case VMEXIT_INVLPG:
rc = hvm_do_hypercall(regs);
if ( rc != HVM_HCALL_preempted )
{
- __update_guest_eip(vmcb, inst_len);
+ __update_guest_eip(regs, inst_len);
if ( rc == HVM_HCALL_invalidate )
send_invalidate_req();
}
movl VCPU_svm_vmcb(%ebx),%ecx
movl UREGS_eax(%esp),%eax
movl %eax,VMCB_rax(%ecx)
+ movl UREGS_eip(%esp),%eax
+ movl %eax,VMCB_rip(%ecx)
+ movl UREGS_esp(%esp),%eax
+ movl %eax,VMCB_rsp(%ecx)
+ movl UREGS_eflags(%esp),%eax
+ movl %eax,VMCB_rflags(%ecx)
movl VCPU_svm_vmcb_pa(%ebx),%eax
popl %ebx
movl VCPU_svm_vmcb(%ebx),%ecx
movl VMCB_rax(%ecx),%eax
movl %eax,UREGS_eax(%esp)
+ movl VMCB_rip(%ecx),%eax
+ movl %eax,UREGS_eip(%esp)
+ movl VMCB_rsp(%ecx),%eax
+ movl %eax,UREGS_esp(%esp)
+ movl VMCB_rflags(%ecx),%eax
+ movl %eax,UREGS_eflags(%esp)
STGI
.globl svm_stgi_label;
movq VCPU_svm_vmcb(%rbx),%rcx
movq UREGS_rax(%rsp),%rax
movq %rax,VMCB_rax(%rcx)
+ movq UREGS_rip(%rsp),%rax
+ movq %rax,VMCB_rip(%rcx)
+ movq UREGS_rsp(%rsp),%rax
+ movq %rax,VMCB_rsp(%rcx)
+ movq UREGS_eflags(%rsp),%rax
+ movq %rax,VMCB_rflags(%rcx)
movq VCPU_svm_vmcb_pa(%rbx),%rax
popq %r15
movq VCPU_svm_vmcb(%rbx),%rcx
movq VMCB_rax(%rcx),%rax
movq %rax,UREGS_rax(%rsp)
+ movq VMCB_rip(%rcx),%rax
+ movq %rax,UREGS_rip(%rsp)
+ movq VMCB_rsp(%rcx),%rax
+ movq %rax,UREGS_rsp(%rsp)
+ movq VMCB_rflags(%rcx),%rax
+ movq %rax,UREGS_eflags(%rsp)
STGI
.globl svm_stgi_label;
{
unsigned int cs_ar_bytes;
- ASSERT(v == current);
-
if ( unlikely(!(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE)) )
return 0;
- if ( unlikely(__vmread(GUEST_RFLAGS) & X86_EFLAGS_VM) )
+ if ( unlikely(guest_cpu_user_regs()->eflags & X86_EFLAGS_VM) )
return 1;
cs_ar_bytes = __vmread(GUEST_CS_AR_BYTES);
if ( hvm_long_mode_enabled(v) &&
vmx_vmcs_enter(v);
- c->rip = __vmread(GUEST_RIP);
- c->rsp = __vmread(GUEST_RSP);
- c->rflags = __vmread(GUEST_RFLAGS);
-
c->cr0 = v->arch.hvm_vcpu.guest_cr[0];
c->cr2 = v->arch.hvm_vcpu.guest_cr[2];
c->cr3 = v->arch.hvm_vcpu.guest_cr[3];
c->msr_efer = v->arch.hvm_vcpu.guest_efer;
-#ifdef HVM_DEBUG_SUSPEND
- printk("%s: cr3=0x%"PRIx64", cr0=0x%"PRIx64", cr4=0x%"PRIx64".\n",
- __func__, c->cr3, c->cr0, c->cr4);
-#endif
-
c->idtr_limit = __vmread(GUEST_IDTR_LIMIT);
c->idtr_base = __vmread(GUEST_IDTR_BASE);
vmx_vmcs_enter(v);
- __vmwrite(GUEST_RIP, c->rip);
- __vmwrite(GUEST_RSP, c->rsp);
- __vmwrite(GUEST_RFLAGS, c->rflags);
-
v->arch.hvm_vcpu.guest_cr[0] = c->cr0 | X86_CR0_ET;
v->arch.hvm_vcpu.guest_cr[2] = c->cr2;
v->arch.hvm_vcpu.guest_cr[3] = c->cr3;
vmx_restore_dr(v);
}
-static void vmx_store_cpu_guest_regs(
- struct vcpu *v, struct cpu_user_regs *regs)
-{
- vmx_vmcs_enter(v);
-
- regs->eflags = __vmread(GUEST_RFLAGS);
- regs->eip = __vmread(GUEST_RIP);
- regs->esp = __vmread(GUEST_RSP);
-
- vmx_vmcs_exit(v);
-}
-
-static void vmx_load_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *regs)
-{
- vmx_vmcs_enter(v);
-
- /* NB. Bit 1 of RFLAGS must be set for VMENTRY to succeed. */
- __vmwrite(GUEST_RFLAGS, regs->eflags | 2UL);
- __vmwrite(GUEST_RIP, regs->eip);
- __vmwrite(GUEST_RSP, regs->esp);
-
- vmx_vmcs_exit(v);
-}
-
static unsigned long vmx_get_segment_base(struct vcpu *v, enum x86_segment seg)
{
unsigned long base = 0;
static int vmx_interrupts_enabled(struct vcpu *v, enum hvm_intack type)
{
- unsigned long intr_shadow, eflags;
-
- ASSERT(v == current);
+ unsigned long intr_shadow;
intr_shadow = __vmread(GUEST_INTERRUPTIBILITY_INFO);
VMX_INTR_SHADOW_NMI));
ASSERT((type == hvm_intack_pic) || (type == hvm_intack_lapic));
- eflags = __vmread(GUEST_RFLAGS);
- return (!irq_masked(eflags) &&
+ return (!irq_masked(guest_cpu_user_regs()->eflags) &&
!(intr_shadow & (VMX_INTR_SHADOW_STI|VMX_INTR_SHADOW_MOV_SS)));
}
.domain_destroy = vmx_domain_destroy,
.vcpu_initialise = vmx_vcpu_initialise,
.vcpu_destroy = vmx_vcpu_destroy,
- .store_cpu_guest_regs = vmx_store_cpu_guest_regs,
- .load_cpu_guest_regs = vmx_load_cpu_guest_regs,
.save_cpu_ctxt = vmx_save_vmcs_ctxt,
.load_cpu_ctxt = vmx_load_vmcs_ctxt,
.interrupts_enabled = vmx_interrupts_enabled,
static void __update_guest_eip(unsigned long inst_len)
{
+ struct cpu_user_regs *regs = guest_cpu_user_regs();
unsigned long x;
- x = __vmread(GUEST_RIP);
- __vmwrite(GUEST_RIP, x + inst_len);
-
- x = __vmread(GUEST_RFLAGS);
- if ( x & X86_EFLAGS_RF )
- __vmwrite(GUEST_RFLAGS, x & ~X86_EFLAGS_RF);
+ regs->eip += inst_len;
+ regs->eflags &= ~X86_EFLAGS_RF;
x = __vmread(GUEST_INTERRUPTIBILITY_INFO);
if ( x & (VMX_INTR_SHADOW_STI | VMX_INTR_SHADOW_MOV_SS) )
*/
static void vmx_do_invlpg(unsigned long va)
{
- unsigned long eip;
struct vcpu *v = current;
HVMTRACE_2D(INVLPG, v, /*invlpga=*/ 0, va);
- eip = __vmread(GUEST_RIP);
-
- HVM_DBG_LOG(DBG_LEVEL_VMMU, "eip=%lx, va=%lx",
- eip, va);
-
/*
* We do the safest things first, then try to update the shadow
* copying from guest
/* Copy current guest state into io instruction state structure. */
memcpy(regs, guest_cpu_user_regs(), HVM_CONTEXT_STACK_BYTES);
- vmx_store_cpu_guest_regs(current, regs);
HVM_DBG_LOG(DBG_LEVEL_IO, "vm86 %d, eip=%x:%lx, "
"exit_qualification = %lx",
static void vmx_world_save(struct vcpu *v, struct vmx_assist_context *c)
{
- /* NB. Skip transition instruction. */
- c->eip = __vmread(GUEST_RIP);
- c->eip += __get_instruction_length(); /* Safe: MOV Cn, LMSW, CLTS */
+ struct cpu_user_regs *regs = guest_cpu_user_regs();
- c->esp = __vmread(GUEST_RSP);
- c->eflags = __vmread(GUEST_RFLAGS) & ~X86_EFLAGS_RF;
+ c->eip = regs->eip;
+ c->eip += __get_instruction_length(); /* Safe: MOV Cn, LMSW, CLTS */
+ c->esp = regs->esp;
+ c->eflags = regs->eflags & ~X86_EFLAGS_RF;
c->cr0 = v->arch.hvm_vcpu.guest_cr[0];
c->cr3 = v->arch.hvm_vcpu.guest_cr[3];
static int vmx_world_restore(struct vcpu *v, struct vmx_assist_context *c)
{
+ struct cpu_user_regs *regs = guest_cpu_user_regs();
unsigned long mfn = 0;
p2m_type_t p2mt;
v->arch.guest_table = pagetable_from_pfn(mfn);
- __vmwrite(GUEST_RIP, c->eip);
- __vmwrite(GUEST_RSP, c->esp);
- __vmwrite(GUEST_RFLAGS, c->eflags);
+ regs->eip = c->eip;
+ regs->esp = c->esp;
+ regs->eflags = c->eflags | 2;
v->arch.hvm_vcpu.guest_cr[0] = c->cr0;
v->arch.hvm_vcpu.guest_cr[3] = c->cr3;
static int vmx_set_cr0(unsigned long value)
{
struct vcpu *v = current;
- unsigned long eip;
int rc = hvm_set_cr0(value);
if ( rc == 0 )
if ( !(value & X86_CR0_PE) )
{
if ( vmx_assist(v, VMX_ASSIST_INVOKE) )
- {
- eip = __vmread(GUEST_RIP);
- HVM_DBG_LOG(DBG_LEVEL_1,
- "Transfering control to vmxassist %%eip 0x%lx", eip);
return 0; /* do not update eip! */
- }
}
else if ( v->arch.hvm_vmx.vmxassist_enabled )
{
- eip = __vmread(GUEST_RIP);
- HVM_DBG_LOG(DBG_LEVEL_1,
- "Enabling CR0.PE at %%eip 0x%lx", eip);
if ( vmx_assist(v, VMX_ASSIST_RESTORE) )
- {
- HVM_DBG_LOG(DBG_LEVEL_1,
- "Restoring to %%eip 0x%lx", eip);
return 0; /* do not update eip! */
- }
}
return 1;
CASE_GET_REG(EBP, ebp);
CASE_GET_REG(ESI, esi);
CASE_GET_REG(EDI, edi);
+ CASE_GET_REG(ESP, esp);
CASE_EXTEND_GET_REG;
- case REG_ESP:
- value = __vmread(GUEST_RSP);
- break;
default:
gdprintk(XENLOG_ERR, "invalid gp: %d\n", gp);
goto exit_and_crash;
CASE_SET_REG(EBP, ebp);
CASE_SET_REG(ESI, esi);
CASE_SET_REG(EDI, edi);
+ CASE_SET_REG(ESP, esp);
CASE_EXTEND_SET_REG;
- case REG_ESP:
- __vmwrite(GUEST_RSP, value);
- regs->esp = value;
- break;
default:
printk("invalid gp: %d\n", gp);
domain_crash(v->domain);
return 0;
}
-static void vmx_do_hlt(void)
+static void vmx_do_hlt(struct cpu_user_regs *regs)
{
- unsigned long rflags;
HVMTRACE_0D(HLT, current);
- rflags = __vmread(GUEST_RFLAGS);
- hvm_hlt(rflags);
+ hvm_hlt(regs->eflags);
}
static void vmx_do_extint(struct cpu_user_regs *regs)
case EXIT_REASON_MACHINE_CHECK:
printk("caused by machine check.\n");
HVMTRACE_0D(MCE, current);
- vmx_store_cpu_guest_regs(current, regs);
do_machine_check(regs);
break;
default:
exit_reason = __vmread(VM_EXIT_REASON);
- HVMTRACE_2D(VMEXIT, v, __vmread(GUEST_RIP), exit_reason);
+ HVMTRACE_2D(VMEXIT, v, regs->eip, exit_reason);
perfc_incra(vmexits, exit_reason);
(X86_EVENTTYPE_NMI << 8) )
goto exit_and_crash;
HVMTRACE_0D(NMI, v);
- vmx_store_cpu_guest_regs(v, regs);
do_nmi(regs); /* Real NMI, vector 2: normal processing. */
break;
case TRAP_machine_check:
HVMTRACE_0D(MCE, v);
- vmx_store_cpu_guest_regs(v, regs);
do_machine_check(regs);
break;
default:
case EXIT_REASON_HLT:
inst_len = __get_instruction_length(); /* Safe: HLT */
__update_guest_eip(inst_len);
- vmx_do_hlt();
+ vmx_do_hlt(regs);
break;
case EXIT_REASON_INVLPG:
{
#include <asm/page.h>
#include <public/xen.h>
+#define VMRESUME .byte 0x0f,0x01,0xc3
+#define VMLAUNCH .byte 0x0f,0x01,0xc2
+#define VMREAD(off) .byte 0x0f,0x78,0x44,0x24,off
+#define VMWRITE(off) .byte 0x0f,0x79,0x44,0x24,off
+
+/* VMCS field encodings */
+#define GUEST_RSP 0x681c
+#define GUEST_RIP 0x681e
+#define GUEST_RFLAGS 0x6820
+
#define GET_CURRENT(reg) \
movl $STACK_SIZE-4, reg; \
orl %esp, reg; \
ALIGN
ENTRY(vmx_asm_vmexit_handler)
HVM_SAVE_ALL_NOSEGREGS
+
+ movl $GUEST_RIP,%eax
+ VMREAD(UREGS_eip)
+ movl $GUEST_RSP,%eax
+ VMREAD(UREGS_esp)
+ movl $GUEST_RFLAGS,%eax
+ VMREAD(UREGS_eflags)
+
movl %esp,%eax
push %eax
call vmx_vmexit_handler
movl %eax,%cr2
call vmx_trace_vmentry
+ movl $GUEST_RIP,%eax
+ VMWRITE(UREGS_eip)
+ movl $GUEST_RSP,%eax
+ VMWRITE(UREGS_esp)
+ movl $GUEST_RFLAGS,%eax
+ VMWRITE(UREGS_eflags)
+
cmpl $0,VCPU_vmx_launched(%ebx)
je vmx_launch
/*vmx_resume:*/
HVM_RESTORE_ALL_NOSEGREGS
- /* VMRESUME */
- .byte 0x0f,0x01,0xc3
+ VMRESUME
pushf
call vm_resume_fail
ud2
vmx_launch:
movl $1,VCPU_vmx_launched(%ebx)
HVM_RESTORE_ALL_NOSEGREGS
- /* VMLAUNCH */
- .byte 0x0f,0x01,0xc2
+ VMLAUNCH
pushf
call vm_launch_fail
ud2
#include <asm/page.h>
#include <public/xen.h>
+#define VMRESUME .byte 0x0f,0x01,0xc3
+#define VMLAUNCH .byte 0x0f,0x01,0xc2
+#define VMREAD(off) .byte 0x0f,0x78,0x44,0x24,off
+#define VMWRITE(off) .byte 0x0f,0x79,0x44,0x24,off
+
+/* VMCS field encodings */
+#define GUEST_RSP 0x681c
+#define GUEST_RIP 0x681e
+#define GUEST_RFLAGS 0x6820
+
#define GET_CURRENT(reg) \
movq $STACK_SIZE-8, reg; \
orq %rsp, reg; \
ALIGN
ENTRY(vmx_asm_vmexit_handler)
HVM_SAVE_ALL_NOSEGREGS
+
+ movl $GUEST_RIP,%eax
+ VMREAD(UREGS_rip)
+ movl $GUEST_RSP,%eax
+ VMREAD(UREGS_rsp)
+ movl $GUEST_RFLAGS,%eax
+ VMREAD(UREGS_eflags)
+
movq %rsp,%rdi
call vmx_vmexit_handler
jmp vmx_asm_do_vmentry
movq %rax,%cr2
call vmx_trace_vmentry
+ movl $GUEST_RIP,%eax
+ VMWRITE(UREGS_rip)
+ movl $GUEST_RSP,%eax
+ VMWRITE(UREGS_rsp)
+ movl $GUEST_RFLAGS,%eax
+ VMWRITE(UREGS_eflags)
+
cmpl $0,VCPU_vmx_launched(%rbx)
je vmx_launch
/*vmx_resume:*/
HVM_RESTORE_ALL_NOSEGREGS
- /* VMRESUME */
- .byte 0x0f,0x01,0xc3
+ VMRESUME
pushfq
call vm_resume_fail
ud2
vmx_launch:
movl $1,VCPU_vmx_launched(%rbx)
HVM_RESTORE_ALL_NOSEGREGS
- /* VMLAUNCH */
- .byte 0x0f,0x01,0xc2
+ VMLAUNCH
pushfq
call vm_launch_fail
ud2
sh_remove_shadows(v, gmfn, 0 /* thorough */, 1 /* must succeed */);
goto done;
}
-
- hvm_store_cpu_guest_regs(v, regs);
}
SHADOW_PRINTK("emulate: eip=%#lx esp=%#lx\n",
}
#endif /* PAE guest */
- /* Emulator has changed the user registers: write back */
- if ( is_hvm_domain(d) )
- hvm_load_cpu_guest_regs(v, regs);
-
SHADOW_PRINTK("emulated\n");
return EXCRET_fault_fixed;
(regs->eip == (unsigned long)svm_stgi_label)) {
/* SVM guest was running when NMI occurred */
ASSERT(is_hvm_vcpu(v));
- hvm_store_cpu_guest_regs(v, guest_regs);
eip = guest_regs->eip;
mode = xenoprofile_get_mode(v, guest_regs);
} else {
BLANK();
OFFSET(VMCB_rax, struct vmcb_struct, rax);
+ OFFSET(VMCB_rip, struct vmcb_struct, rip);
+ OFFSET(VMCB_rsp, struct vmcb_struct, rsp);
+ OFFSET(VMCB_rflags, struct vmcb_struct, rflags);
BLANK();
OFFSET(VCPUINFO_upcall_pending, vcpu_info_t, evtchn_upcall_pending);
{
struct segment_register sreg;
context = "hvm";
- hvm_store_cpu_guest_regs(v, &fault_regs);
fault_crs[0] = v->arch.hvm_vcpu.guest_cr[0];
fault_crs[2] = v->arch.hvm_vcpu.guest_cr[2];
fault_crs[3] = v->arch.hvm_vcpu.guest_cr[3];
BLANK();
OFFSET(VMCB_rax, struct vmcb_struct, rax);
+ OFFSET(VMCB_rip, struct vmcb_struct, rip);
+ OFFSET(VMCB_rsp, struct vmcb_struct, rsp);
+ OFFSET(VMCB_rflags, struct vmcb_struct, rflags);
BLANK();
OFFSET(VCPUINFO_upcall_pending, struct vcpu_info, evtchn_upcall_pending);
{
struct segment_register sreg;
context = "hvm";
- hvm_store_cpu_guest_regs(v, &fault_regs);
fault_crs[0] = v->arch.hvm_vcpu.guest_cr[0];
fault_crs[2] = v->arch.hvm_vcpu.guest_cr[2];
fault_crs[3] = v->arch.hvm_vcpu.guest_cr[3];
#ifndef __ASM_X86_HVM_HVM_H__
#define __ASM_X86_HVM_HVM_H__
+#include <asm/current.h>
#include <asm/x86_emulate.h>
#include <public/domctl.h>
#include <public/hvm/save.h>
int (*vcpu_initialise)(struct vcpu *v);
void (*vcpu_destroy)(struct vcpu *v);
- /*
- * Store and load guest state:
- * 1) load/store guest register state,
- * 2) modify guest state (e.g., set debug flags).
- */
- void (*store_cpu_guest_regs)(
- struct vcpu *v, struct cpu_user_regs *r);
- void (*load_cpu_guest_regs)(
- struct vcpu *v, struct cpu_user_regs *r);
-
/* save and load hvm guest cpu context for save/restore */
void (*save_cpu_ctxt)(struct vcpu *v, struct hvm_hw_cpu *ctxt);
int (*load_cpu_ctxt)(struct vcpu *v, struct hvm_hw_cpu *ctxt);
void hvm_send_assist_req(struct vcpu *v);
-static inline void
-hvm_store_cpu_guest_regs(
- struct vcpu *v, struct cpu_user_regs *r)
-{
- hvm_funcs.store_cpu_guest_regs(v, r);
-}
-
-static inline void
-hvm_load_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *r)
-{
- hvm_funcs.load_cpu_guest_regs(v, r);
-}
-
void hvm_set_guest_time(struct vcpu *v, u64 gtime);
u64 hvm_get_guest_time(struct vcpu *v);
static inline int
hvm_interrupts_enabled(struct vcpu *v, enum hvm_intack type)
{
+ ASSERT(v == current);
return hvm_funcs.interrupts_enabled(v, type);
}
static inline int
hvm_guest_x86_mode(struct vcpu *v)
{
+ ASSERT(v == current);
return hvm_funcs.guest_x86_mode(v);
}
return index;
}
-
-
-static void inline __update_guest_eip(
- struct vmcb_struct *vmcb, int inst_len)
-{
- ASSERT(inst_len > 0);
- vmcb->rip += inst_len;
- vmcb->rflags &= ~X86_EFLAGS_RF;
-}
-
#endif /* __ASM_X86_HVM_SVM_EMULATE_H__ */
/*
#define ARCH_HVM_IO_WAIT 1 /* Waiting for I/O completion */
-#define HVM_CONTEXT_STACK_BYTES (offsetof(struct cpu_user_regs, error_code))
+#define HVM_CONTEXT_STACK_BYTES (offsetof(struct cpu_user_regs, ss))
#endif /* __ASM_X86_HVM_VCPU_H__ */